d <- read_csv(data_path)
##
## ── Column specification ────────────────────────────────────────────────────────
## cols(
## .default = col_character(),
## trial_index = col_double(),
## time_elapsed = col_double(),
## experiment_id = col_double(),
## survey_code = col_logical(),
## seed = col_double(),
## success = col_logical(),
## timeout = col_logical(),
## rt = col_double(),
## start_time = col_double(),
## end_time = col_double(),
## choice_index = col_double(),
## reward_score = col_double(),
## reward = col_double(),
## reward_score_unadjusted = col_double(),
## score_after_trial = col_double(),
## slider_start = col_double(),
## n = col_double()
## )
## ℹ Use `spec()` for the full column specifications.
d <- d %>%
filter(
!(trial_type %in% c("show-reward"))
)
Adding columns to characterize participant choices.
d <- d %>%
mutate(
trial_number = case_when(
trial_index<8 ~ trial_index,
trial_index<199 ~ 7+(trial_index-7)/2,
TRUE ~ trial_index-96
)
) %>%
relocate(trial_number,.after=trial_index) %>%
mutate(
test_trial_number = case_when(
trial_number<7 ~ NA_real_,
trial_number<103 ~ trial_number-6,
TRUE ~ NA_real_
)
) %>%
relocate(test_trial_number,.after=trial_number) %>%
mutate(
block_trial_number = case_when(
test_trial_number < 49 ~ test_trial_number,
TRUE ~ test_trial_number - 48),
block_trial_number_c = block_trial_number - 24.5
) %>%
relocate(block_trial_number,.after=test_trial_number) %>%
relocate(block_trial_number_c,.after=block_trial_number) %>%
mutate(
explore_block = case_when(
test_trial_number<9 ~ 1,
test_trial_number<17 ~ 2,
test_trial_number<25 ~ 3,
test_trial_number<33 ~ 4,
test_trial_number < 41 ~ 5,
test_trial_number < 49 ~ 6,
test_trial_number < 57 ~ 7,
test_trial_number<65 ~ 8,
test_trial_number<73 ~ 9,
test_trial_number<81 ~ 10,
test_trial_number <89 ~ 11,
test_trial_number <97 ~ 12,
TRUE ~ NA_real_
)
) %>%
mutate(
max_reward_choice = case_when(
reward_score_unadjusted ==8 ~ 1,
!is.na(test_trial_number) ~ 0,
TRUE ~ NA_real_
)
) %>%
mutate(
cur_structure_condition=case_when(
test_trial_number < 49 ~ structure_condition,
!is.na(test_trial_number) & match_condition == "match" ~ structure_condition,
test_trial_number >= 49 & structure_condition == "emotion" ~ "model",
test_trial_number >= 49 & structure_condition == "model" ~ "emotion"
)
) %>%
mutate(block = case_when(
test_trial_number < 49 ~ 1,
test_trial_number >= 49 ~ 2,
TRUE ~ NA_real_
))
#recenter vars
d <- d %>%
mutate(
structure_condition_c = case_when(
structure_condition == "model" ~ -0.5,
structure_condition == "emotion" ~ 0.5),
cur_structure_condition_c = case_when(
cur_structure_condition == "model" ~ -0.5,
cur_structure_condition == "emotion" ~ 0.5),
match_condition_c = case_when(
match_condition == "match" ~ 0.5,
match_condition == "mismatch" ~ -0.5
),
cur_structure_condition_model = case_when(
cur_structure_condition == "model" ~ 0,
cur_structure_condition == "emotion" ~ 1),
cur_structure_condition_emotion = case_when(
cur_structure_condition == "model" ~ -1,
cur_structure_condition == "emotion" ~ 0),
match_condition_match = case_when(
match_condition == "match" ~ 0,
match_condition == "mismatch" ~ -1
),
match_condition_mismatch = case_when(
match_condition == "match" ~ 1,
match_condition == "mismatch" ~ 0
),
block_c = case_when(
test_trial_number < 49 ~ -0.5,
TRUE ~ 0.5
),
block_learn = case_when(
block==1 ~ 0,
block==2 ~ 1
),
block_gen = case_when(
block==1 ~ -1,
block==2 ~ 0
)
)
open_resps <- d %>%
filter(trial_index %in% 206) %>%
select(subject, structure_condition, match_condition ,response) %>%
extract(response, into = c("patterns", "strategy", "comments"),
regex = "patterns\":\"(.*)\",\"strategy\":\"(.*)\",\"comments\":\"(.*)")
#write_csv(open_resps, here("data-analysis","data","v1","processed","emogo-v1-openresponses.csv"))
attention_check <- d %>%
filter(trial_index %in% c(4,5)) %>%
mutate(
attention_check_correct_choice = case_when(
trial_index == 4 ~ "stimuli/horse.jpg",
trial_index == 5 ~ "stimuli/hammer.jpg"
),
check_correct = ifelse(attention_check_correct_choice==choiceImage,1,0)
) %>%
group_by(subject) %>%
summarize(
N=n(),
avg_correct = mean(check_correct)
)
passed_attention_check <- attention_check %>%
filter(avg_correct ==1) %>%
pull(subject)
total_time <- d %>%
filter(trial_index==206) %>%
select(subject,time_elapsed) %>%
distinct() %>%
mutate(time_mins = time_elapsed/1000/60)
#Minumum time
min(total_time$time_mins)
## [1] 5.328617
#Any subjects with times under 4 minutes?
subjects_too_fast <- total_time %>%
filter(time_mins<4)
subjects_too_fast %>%
pull(subject)
## character(0)
percent_location_selections <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(subject,choiceLocation) %>%
summarise(n = n()) %>%
mutate(freq = n / sum(n))
## `summarise()` has grouped output by 'subject'. You can override using the
## `.groups` argument.
#any frequencies above 80%?
subjects_same_location_exclusion <- percent_location_selections %>%
filter(freq>0.8)
subjects_same_location_exclusion %>%
distinct(subject) %>%
pull(subject)
## [1] "p662117" "p748221"
reward_rank <- d %>%
filter(subject %in% passed_attention_check) %>%
filter(test_trial_number==96) %>%
select(subject,structure_condition,match_condition,score_after_trial)
median_score <- median(reward_rank$score_after_trial)
ggplot(reward_rank,aes(x=score_after_trial))+
geom_histogram()+
geom_vline(xintercept = median_score)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
ggplot(reward_rank,aes(x=score_after_trial,color=match_condition))+
geom_density()+
facet_wrap(~structure_condition)
subjects_top_50 <- reward_rank %>%
arrange(desc(reward_rank)) %>%
slice_head(n = 50)
write_csv(subjects_top_50,here("data-analysis","data","spark-psychopathology","processed","subjects_top_50.csv"))
conditions_top_50 <- reward_rank %>%
filter(subject %in% subjects_top_50$subject) %>%
group_by(structure_condition,match_condition) %>%
tally()
conditions_top_50
## # A tibble: 4 × 3
## # Groups: structure_condition [2]
## structure_condition match_condition n
## <chr> <chr> <int>
## 1 emotion match 4
## 2 emotion mismatch 20
## 3 model match 14
## 4 model mismatch 12
#exclude any participants who meet exclusion criteria
d <- d %>%
filter(subject %in% passed_attention_check) %>%
filter(!(subject %in% subjects_same_location_exclusion)) %>%
filter(!(subject %in% subjects_too_fast))
d %>%
distinct(subject,structure_condition,match_condition) %>%
group_by(structure_condition,match_condition) %>%
tally()
## # A tibble: 4 × 3
## # Groups: structure_condition [2]
## structure_condition match_condition n
## <chr> <chr> <int>
## 1 emotion match 83
## 2 emotion mismatch 83
## 3 model match 78
## 4 model mismatch 81
subject_by_block <- d %>%
filter(!is.na(explore_block)) %>%
group_by(subject,match_condition,structure_condition,explore_block) %>%
summarize(
max_choice_percent=mean(max_reward_choice)
)
## `summarise()` has grouped output by 'subject', 'match_condition',
## 'structure_condition'. You can override using the `.groups` argument.
summarize_by_block <- subject_by_block %>%
group_by(explore_block) %>%
summarize(
N=n(),
max_choice = mean(max_choice_percent),
se = sqrt(var(max_choice_percent, na.rm = TRUE)/N),
ci=qt(0.975, N-1)*sd(max_choice_percent,na.rm=TRUE)/sqrt(N),
lower_ci=max_choice-ci,
upper_ci=max_choice+ci,
lower_se=max_choice-se,
upper_se=max_choice+se
)
summarize_by_block_by_condition <- subject_by_block %>%
group_by(match_condition,structure_condition,explore_block) %>%
summarize(
N=n(),
max_choice = mean(max_choice_percent),
se = sqrt(var(max_choice_percent, na.rm = TRUE)/N),
ci=qt(0.975, N-1)*sd(max_choice_percent,na.rm=TRUE)/sqrt(N),
lower_ci=max_choice-ci,
upper_ci=max_choice+ci,
lower_se=max_choice-se,
upper_se=max_choice+se
)
## `summarise()` has grouped output by 'match_condition', 'structure_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_trial <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(structure_condition, match_condition,test_trial_number) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8),
reward_6 = mean(reward_score_unadjusted==6),
reward_4 = mean(reward_score_unadjusted==4),
reward_2 = mean(reward_score_unadjusted==2)
) %>%
pivot_longer(cols = c(reward_8,reward_6,reward_4,reward_2),names_to = "reward",values_to = "percent_choice",names_prefix="reward_")
## `summarise()` has grouped output by 'structure_condition', 'match_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_block <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(structure_condition, match_condition,explore_block) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8),
reward_6 = mean(reward_score_unadjusted==6),
reward_4 = mean(reward_score_unadjusted==4),
reward_2 = mean(reward_score_unadjusted==2)
) %>%
pivot_longer(cols = c(reward_8,reward_6,reward_4,reward_2),names_to = "reward",values_to = "percent_choice",names_prefix="reward_")
## `summarise()` has grouped output by 'structure_condition', 'match_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_subject <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(subject,cur_structure_condition, match_condition,block) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8)
) %>%
mutate(
block_name = case_when(
block==2 ~ "generalization block",
block==1~"learning block"
)
)
## `summarise()` has grouped output by 'subject', 'cur_structure_condition',
## 'match_condition'. You can override using the `.groups` argument.
summarize_choice_by_subject$block_name <- factor(summarize_choice_by_subject$block_name, levels=c("learning block","generalization block"))
ggplot(subject_by_block,aes(explore_block,max_choice_percent))+
geom_point(size=1.5,alpha=0.1,aes(group=subject))+
geom_line(alpha=0.1,aes(group=subject))+
geom_point(data=summarize_by_block,aes(y=max_choice),size=2,color="black")+
geom_line(data=summarize_by_block,aes(y=max_choice),size=1.2,color="black")+
geom_errorbar(data=summarize_by_block,aes(y=max_choice,ymin=lower_se,ymax=upper_se),width=0,color="black")+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## Warning: Please use `linewidth` instead.
ggplot(summarize_by_block_by_condition,aes(explore_block,max_choice, color=structure_condition,shape=match_condition,linetype=match_condition))+
geom_point(size=1.5,alpha=0.5)+
geom_line(alpha=0.5)+
geom_point(aes(y=max_choice),size=2)+
geom_line(aes(y=max_choice),size=1.2)+
geom_errorbar(aes(y=max_choice,ymin=lower_se,ymax=upper_se),width=0)+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
#theme(legend.position="none")+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
ggplot(subject_by_block,aes(explore_block,max_choice_percent, group=subject))+
#geom_point(size=1.5,alpha=0.5)+
geom_line(alpha=0.5)+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
theme(legend.position="none")+
facet_wrap(~structure_condition+match_condition)+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
ggplot(summarize_choice_by_trial,aes(test_trial_number,percent_choice,color=reward))+
geom_point()+
geom_line()+
geom_vline(xintercept=48.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Trial Number")+
ylab("Percent choices")+
facet_wrap(~structure_condition+match_condition)
ggplot(summarize_choice_by_block,aes(explore_block,percent_choice,color=reward))+
geom_point()+
geom_line()+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent choices")+
facet_wrap(~structure_condition+match_condition)
ggplot(summarize_choice_by_subject,aes(cur_structure_condition,reward_8,color = cur_structure_condition))+
geom_boxplot()+
geom_jitter(width=0.1)+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Structure Condition")+
ylab("Percent Reward-Maximizing Choices")+
facet_wrap(~match_condition+block_name)+
theme_cowplot()+
theme(legend.position="none")
ggsave(here(figure_path,"overall_reward_maximizing.png"),width=6,height=6)
#### Pruning random effects structure
#maximal random effects structure
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_c*block_trial_number_c + (1+block_c*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_c * block_trial_number_c + (1 + block_c * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 24336.0 24561.4 -12141.0 24282.0 31173
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -22.0141 -0.3824 0.0179 0.3535 12.0665
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subject (Intercept) 3.90192 1.97533
## block_c 10.76428 3.28090 0.11
## block_trial_number_c 0.00464 0.06812 0.86 0.28
## block_c:block_trial_number_c 0.01065 0.10321 0.23 0.78 0.36
## choiceImage (Intercept) 6.85526 2.61825
## Number of obs: 31200, groups: subject, 325; choiceImage, 248
##
## Fixed effects:
## Estimate
## (Intercept) -0.189100
## cur_structure_condition_c -0.921282
## match_condition_c 0.632949
## block_c 0.520692
## block_trial_number_c 0.071770
## cur_structure_condition_c:match_condition_c -0.345643
## cur_structure_condition_c:block_c -1.580452
## match_condition_c:block_c 0.961556
## cur_structure_condition_c:block_trial_number_c -0.042275
## match_condition_c:block_trial_number_c 0.014365
## block_c:block_trial_number_c 0.027427
## cur_structure_condition_c:match_condition_c:block_c -1.835483
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.017331
## cur_structure_condition_c:block_c:block_trial_number_c -0.054509
## match_condition_c:block_c:block_trial_number_c 0.036989
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c -0.028463
## Std. Error
## (Intercept) 0.205779
## cur_structure_condition_c 0.210496
## match_condition_c 0.226750
## block_c 0.195488
## block_trial_number_c 0.004310
## cur_structure_condition_c:match_condition_c 0.419420
## cur_structure_condition_c:block_c 0.419879
## match_condition_c:block_c 0.381567
## cur_structure_condition_c:block_trial_number_c 0.007690
## match_condition_c:block_trial_number_c 0.008401
## block_c:block_trial_number_c 0.007055
## cur_structure_condition_c:match_condition_c:block_c 0.832402
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.015259
## cur_structure_condition_c:block_c:block_trial_number_c 0.015305
## match_condition_c:block_c:block_trial_number_c 0.013550
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c 0.030299
## z value
## (Intercept) -0.919
## cur_structure_condition_c -4.377
## match_condition_c 2.791
## block_c 2.664
## block_trial_number_c 16.652
## cur_structure_condition_c:match_condition_c -0.824
## cur_structure_condition_c:block_c -3.764
## match_condition_c:block_c 2.520
## cur_structure_condition_c:block_trial_number_c -5.497
## match_condition_c:block_trial_number_c 1.710
## block_c:block_trial_number_c 3.888
## cur_structure_condition_c:match_condition_c:block_c -2.205
## cur_structure_condition_c:match_condition_c:block_trial_number_c -1.136
## cur_structure_condition_c:block_c:block_trial_number_c -3.562
## match_condition_c:block_c:block_trial_number_c 2.730
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c -0.939
## Pr(>|z|)
## (Intercept) 0.358123
## cur_structure_condition_c 1.20e-05
## match_condition_c 0.005248
## block_c 0.007732
## block_trial_number_c < 2e-16
## cur_structure_condition_c:match_condition_c 0.409884
## cur_structure_condition_c:block_c 0.000167
## match_condition_c:block_c 0.011735
## cur_structure_condition_c:block_trial_number_c 3.85e-08
## match_condition_c:block_trial_number_c 0.087282
## block_c:block_trial_number_c 0.000101
## cur_structure_condition_c:match_condition_c:block_c 0.027451
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.256021
## cur_structure_condition_c:block_c:block_trial_number_c 0.000369
## match_condition_c:block_c:block_trial_number_c 0.006338
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c 0.347529
##
## (Intercept)
## cur_structure_condition_c ***
## match_condition_c **
## block_c **
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c
## cur_structure_condition_c:block_c ***
## match_condition_c:block_c *
## cur_structure_condition_c:block_trial_number_c ***
## match_condition_c:block_trial_number_c .
## block_c:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_c *
## cur_structure_condition_c:match_condition_c:block_trial_number_c
## cur_structure_condition_c:block_c:block_trial_number_c ***
## match_condition_c:block_c:block_trial_number_c **
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
#create predicted data
pX <- expand.grid(
cur_structure_condition_c=c(-0.5,0.5),
match_condition_c=c(-0.5,0.5),
block_c=c(-0.5,0.5),
block_trial_number_c=seq(-23.5,23.5,by=1))
predictions <- predictSE(m,pX,re.form=NA, type="response")
pX$fit <- predictions$fit
pX$se.fit <- predictions$se.fit
pX <- pX %>%
mutate(
block_trial_number = block_trial_number_c+24.5,
cur_structure_condition = case_when(
cur_structure_condition_c==0.5 ~ "emotion",
cur_structure_condition_c==-0.5 ~ "model"
),
match_condition=case_when(
match_condition_c==0.5 ~ "match",
match_condition_c==-0.5 ~ "mismatch"
),
block_name = case_when(
block_c==0.5 ~ "generalization block",
block_c==-0.5~"learning block"
)
) %>%
mutate(
block_name=factor(block_name,levels=c("learning block","generalization block"))
)
d <- d %>%
mutate(
block_name = case_when(
block==1 ~ "learning block",
block==2 ~ "generalization block")
) %>%
mutate(
block_name=factor(block_name,levels=c("learning block","generalization block"))
)
p <- ggplot(subset(d,!is.na(block_trial_number)),aes(block_trial_number,as.factor(max_reward_choice),color=cur_structure_condition))+
geom_point(size = 0.5, alpha=0.2,shape=19,position = position_jitterdodge(jitter.width = 0.05,jitter.height = 0.5,dodge.width = 0.2,seed = 1))+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==-0.5),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = .3 ),scale="count",width=0.4,alpha=0.3,color=NA)+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==0.5),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = -.3 ),scale="count",width=0.4,alpha=0.3,color=NA)+
geom_hline(yintercept=0.25*4+1,linetype="dashed")+
geom_smooth(data=pX,aes(y=fit*4+1,ymax=(fit+se.fit)*4+1,ymin=(fit-se.fit)*4+1,fill=cur_structure_condition),stat="identity")+
theme_classic(base_size=18)+
ylab("Probability of \nreward-maximizing choice")+
scale_color_brewer(
palette="Set1",
name="Structure Condition",
breaks=c(0.5,-0.5),
labels=c("Emotion","Model"))+
scale_fill_brewer(
palette="Set1",
name="Structure Condition",
breaks=c(0.5,-0.5),
labels=c("Emotion","Model"))+
scale_y_discrete(limits=c("0","0.25","0.5","0.75","1"))+
xlab("Block Trial Number")+
facet_wrap(~match_condition+block_name)+
theme(legend.position=c(0.4,0.4))
p
## Warning: Using the `size` aesthietic with geom_polygon was deprecated in ggplot2
## 3.4.0.
## Warning: Please use the `linewidth` aesthetic instead.
ggsave(here(figure_path,"model_predictions.png"),width=9,height=6)
p <- ggplot(subset(d,!is.na(block_trial_number)&block==2),aes(block_trial_number,as.factor(max_reward_choice),color=cur_structure_condition))+
geom_point(size = 0.5, alpha=0.1,shape=19,position = position_jitterdodge(jitter.width = 0.05,jitter.height = 0.5,dodge.width = 0.2,seed = 1))+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==-0.5&block==2),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = .3 ),scale="count",width=0.4,alpha=0.2,color=NA)+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==0.5&block==2),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = -.3 ),scale="count",width=0.4,alpha=0.2,color=NA)+
geom_hline(yintercept=0.25*4+1,linetype="dashed")+
geom_smooth(data=filter(pX,block_name=="generalization block"),aes(y=fit*4+1,ymax=(fit+se.fit)*4+1,ymin=(fit-se.fit)*4+1,fill=cur_structure_condition),stat="identity")+
theme_classic(base_size=18)+
ylab("Probability of \nreward-maximizing choice")+
scale_color_brewer(
palette="Set1",
name="Structure Condition",
breaks=c("model","emotion"),
labels=c("Model","Emotion"))+
scale_fill_brewer(
palette="Set1",
name="Structure Condition",
breaks=c("model","emotion"),
labels=c("Model","Emotion"))+
scale_y_discrete(limits=c("0","0.25","0.5","0.75","1"))+
xlab("Block Trial Number")+
facet_wrap(~match_condition)+
theme(legend.position=c(0.39,0.45),legend.title=element_text(size=14),legend.text=element_text(size=12),legend.background=element_rect(fill =NA))
p
ggsave(here(figure_path,"model_prediction_generalization_only.pdf"),width=9,height=6)
Recenter the model on the learning block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_learn*block_trial_number_c+ (1+block_learn*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_learn * block_trial_number_c + (1 + block_learn * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 24336.0 24561.4 -12141.0 24282.0 31173
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -22.0146 -0.3824 0.0179 0.3535 12.0664
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subject (Intercept) 5.88593 2.42609
## block_learn 10.76446 3.28092 -0.59
## block_trial_number_c 0.00479 0.06921 0.76 -0.31
## block_learn:block_trial_number_c 0.01065 0.10322 -0.34 0.78
## choiceImage (Intercept) 6.85520 2.61824
##
##
##
##
## -0.39
##
## Number of obs: 31200, groups: subject, 325; choiceImage, 248
##
## Fixed effects:
## Estimate
## (Intercept) -0.449467
## cur_structure_condition_c -0.131087
## match_condition_c 0.152142
## block_learn 0.520745
## block_trial_number_c 0.058056
## cur_structure_condition_c:match_condition_c 0.571892
## cur_structure_condition_c:block_learn -1.580437
## match_condition_c:block_learn 0.961566
## cur_structure_condition_c:block_trial_number_c -0.015022
## match_condition_c:block_trial_number_c -0.004131
## block_learn:block_trial_number_c 0.027429
## cur_structure_condition_c:match_condition_c:block_learn -1.835420
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.003105
## cur_structure_condition_c:block_learn:block_trial_number_c -0.054509
## match_condition_c:block_learn:block_trial_number_c 0.036990
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c -0.028464
## Std. Error
## (Intercept) 0.220839
## cur_structure_condition_c 0.277346
## match_condition_c 0.277131
## block_learn 0.195520
## block_trial_number_c 0.004504
## cur_structure_condition_c:match_condition_c 0.549545
## cur_structure_condition_c:block_learn 0.419312
## match_condition_c:block_learn 0.381913
## cur_structure_condition_c:block_trial_number_c 0.008844
## match_condition_c:block_trial_number_c 0.008838
## block_learn:block_trial_number_c 0.007056
## cur_structure_condition_c:match_condition_c:block_learn 0.828020
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.017596
## cur_structure_condition_c:block_learn:block_trial_number_c 0.015293
## match_condition_c:block_learn:block_trial_number_c 0.013558
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c 0.030222
## z value
## (Intercept) -2.035
## cur_structure_condition_c -0.473
## match_condition_c 0.549
## block_learn 2.663
## block_trial_number_c 12.890
## cur_structure_condition_c:match_condition_c 1.041
## cur_structure_condition_c:block_learn -3.769
## match_condition_c:block_learn 2.518
## cur_structure_condition_c:block_trial_number_c -1.699
## match_condition_c:block_trial_number_c -0.467
## block_learn:block_trial_number_c 3.888
## cur_structure_condition_c:match_condition_c:block_learn -2.217
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.176
## cur_structure_condition_c:block_learn:block_trial_number_c -3.564
## match_condition_c:block_learn:block_trial_number_c 2.728
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c -0.942
## Pr(>|z|)
## (Intercept) 0.041824
## cur_structure_condition_c 0.636464
## match_condition_c 0.583013
## block_learn 0.007736
## block_trial_number_c < 2e-16
## cur_structure_condition_c:match_condition_c 0.298032
## cur_structure_condition_c:block_learn 0.000164
## match_condition_c:block_learn 0.011810
## cur_structure_condition_c:block_trial_number_c 0.089404
## match_condition_c:block_trial_number_c 0.640221
## block_learn:block_trial_number_c 0.000101
## cur_structure_condition_c:match_condition_c:block_learn 0.026648
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.859953
## cur_structure_condition_c:block_learn:block_trial_number_c 0.000365
## match_condition_c:block_learn:block_trial_number_c 0.006366
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c 0.346291
##
## (Intercept) *
## cur_structure_condition_c
## match_condition_c
## block_learn **
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c
## cur_structure_condition_c:block_learn ***
## match_condition_c:block_learn *
## cur_structure_condition_c:block_trial_number_c .
## match_condition_c:block_trial_number_c
## block_learn:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_learn *
## cur_structure_condition_c:match_condition_c:block_trial_number_c
## cur_structure_condition_c:block_learn:block_trial_number_c ***
## match_condition_c:block_learn:block_trial_number_c **
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
Recenter the model on the generalization block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_gen*block_trial_number_c+ (1+block_gen*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_gen * block_trial_number_c + (1 + block_gen * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 24336.0 24561.4 -12141.0 24282.0 31173
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -22.0159 -0.3824 0.0179 0.3535 12.0665
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subject (Intercept) 7.30122 2.70208
## block_gen 10.76550 3.28108 0.69
## block_trial_number_c 0.00982 0.09909 0.88 0.60
## block_gen:block_trial_number_c 0.01066 0.10322 0.64 0.78 0.77
## choiceImage (Intercept) 6.85555 2.61831
## Number of obs: 31200, groups: subject, 325; choiceImage, 248
##
## Fixed effects:
## Estimate
## (Intercept) 0.071274
## cur_structure_condition_c -1.711584
## match_condition_c 1.113802
## block_gen 0.520777
## block_trial_number_c 0.085488
## cur_structure_condition_c:match_condition_c -1.263288
## cur_structure_condition_c:block_gen -1.580528
## match_condition_c:block_gen 0.961643
## cur_structure_condition_c:block_trial_number_c -0.069534
## match_condition_c:block_trial_number_c 0.032863
## block_gen:block_trial_number_c 0.027432
## cur_structure_condition_c:match_condition_c:block_gen -1.835375
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.031562
## cur_structure_condition_c:block_gen:block_trial_number_c -0.054514
## match_condition_c:block_gen:block_trial_number_c 0.036994
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c -0.028462
## Std. Error
## (Intercept) 0.234404
## cur_structure_condition_c 0.315205
## match_condition_c 0.314173
## block_gen 0.195437
## block_trial_number_c 0.006460
## cur_structure_condition_c:match_condition_c 0.625659
## cur_structure_condition_c:block_gen 0.418940
## match_condition_c:block_gen 0.381081
## cur_structure_condition_c:block_trial_number_c 0.012520
## match_condition_c:block_trial_number_c 0.012439
## block_gen:block_trial_number_c 0.007054
## cur_structure_condition_c:match_condition_c:block_gen 0.832358
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.024747
## cur_structure_condition_c:block_gen:block_trial_number_c 0.015282
## match_condition_c:block_gen:block_trial_number_c 0.013542
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c 0.030295
## z value
## (Intercept) 0.304
## cur_structure_condition_c -5.430
## match_condition_c 3.545
## block_gen 2.665
## block_trial_number_c 13.233
## cur_structure_condition_c:match_condition_c -2.019
## cur_structure_condition_c:block_gen -3.773
## match_condition_c:block_gen 2.523
## cur_structure_condition_c:block_trial_number_c -5.554
## match_condition_c:block_trial_number_c 2.642
## block_gen:block_trial_number_c 3.889
## cur_structure_condition_c:match_condition_c:block_gen -2.205
## cur_structure_condition_c:match_condition_c:block_trial_number_c -1.275
## cur_structure_condition_c:block_gen:block_trial_number_c -3.567
## match_condition_c:block_gen:block_trial_number_c 2.732
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c -0.940
## Pr(>|z|)
## (Intercept) 0.761078
## cur_structure_condition_c 5.63e-08
## match_condition_c 0.000392
## block_gen 0.007706
## block_trial_number_c < 2e-16
## cur_structure_condition_c:match_condition_c 0.043474
## cur_structure_condition_c:block_gen 0.000162
## match_condition_c:block_gen 0.011621
## cur_structure_condition_c:block_trial_number_c 2.79e-08
## match_condition_c:block_trial_number_c 0.008240
## block_gen:block_trial_number_c 0.000101
## cur_structure_condition_c:match_condition_c:block_gen 0.027452
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.202184
## cur_structure_condition_c:block_gen:block_trial_number_c 0.000361
## match_condition_c:block_gen:block_trial_number_c 0.006298
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c 0.347472
##
## (Intercept)
## cur_structure_condition_c ***
## match_condition_c ***
## block_gen **
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c *
## cur_structure_condition_c:block_gen ***
## match_condition_c:block_gen *
## cur_structure_condition_c:block_trial_number_c ***
## match_condition_c:block_trial_number_c **
## block_gen:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_gen *
## cur_structure_condition_c:match_condition_c:block_trial_number_c
## cur_structure_condition_c:block_gen:block_trial_number_c ***
## match_condition_c:block_gen:block_trial_number_c **
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
Not evaluated for spark-psychopathology
trying to sort out how to look at individual conditions and blocks
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c*block_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
summary(m)#not really sure why there should be a match effect here... Noise?
## learning block
# m <- glmer(max_reward_choice ~ structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
# summary(m)
#
# # re-run so I don't have to re-label things
# m <- glmer(max_reward_choice ~ structure_condition*block_trial_number+ (1+block_trial_number|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
# summary(m)
#
# sjPlot::plot_model(m, type = "pred", terms = c("block_trial_number", "structure_condition"),
# show.data = T,
# jitter = .01,
# title = "",
# axis.title = c("Trial", "Reward Maximizing Choices"),
# legend.title = "Structure Condition",
# colors = c( "orange3", "green4"),
# auto.label = FALSE)+
# theme_classic(base_size = 14, base_family = "")
# overall (same model as in section above)
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial)
summary(m)
# generalization block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=20000)))
summary(m)
sjPlot::plot_model(m, type = "pred", terms = c("block_trial_number_c", "match_condition_c","cur_structure_condition_c"),
jitter = .01,
title = "Structure Condition",
axis.title = c("Trial", "Reward Maximizing Choices"),
legend.title = "Match Condition",
colors = c( "firebrick", "darkblue")) +
theme_classic(base_size = 14, base_family = "")